static int vmx_do_page_fault(unsigned long va, unsigned long error_code)
{
- unsigned long eip, pfn;
- unsigned int index;
- unsigned long gpde = 0, gpte, gpa;
+ unsigned long eip;
+ unsigned long gpa;
int result;
- struct exec_domain *ed = current;
#if VMX_DEBUG
{
va, eip, error_code);
}
#endif
- /*
- * Set up guest page directory cache to make linear_pt_table[] work.
- */
- __guest_get_l2e(ed, va, &gpde);
- if (!(gpde & _PAGE_PRESENT))
- return 0;
-
- index = (va >> L2_PAGETABLE_SHIFT);
- if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) {
- pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
- VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_do_page_fault: pagetable = %lx\n",
- pagetable_val(ed->arch.pagetable));
-
- ed->arch.guest_pl2e_cache[index] =
- mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
- }
-
- if (unlikely(__get_user(gpte, (unsigned long *)
- &linear_pg_table[va >> PAGE_SHIFT])))
+ gpa = gva_to_gpa(va);
+ if (!gpa)
return 0;
-
- gpa = (gpte & PAGE_MASK) | (va & (PAGE_SIZE - 1));
if (mmio_space(gpa))
- handle_mmio(va, gpte, gpa);
+ handle_mmio(va, gpa);
if ((result = shadow_fault(va, error_code)))
return result;
memset(ed->arch.guest_pl2e_cache, 0, PAGE_SIZE);
}
-inline unsigned long gva_to_gpa(unsigned long gva)
-{
- unsigned long gpde, gpte, pfn, index;
- struct exec_domain *ed = current;
-
- __guest_get_l2e(ed, gva, &gpde);
- index = (gva >> L2_PAGETABLE_SHIFT);
-
- pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
-
- ed->arch.guest_pl2e_cache[index] =
- mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
-
- if ( unlikely(__get_user(gpte, (unsigned long *)
- &linear_pg_table[gva >> PAGE_SHIFT])) )
- {
- printk("gva_to_gpa EXIT: read gpte faulted" );
- return 0;
- }
-
- if ( !(gpte & _PAGE_PRESENT) )
- {
- printk("gva_to_gpa - EXIT: gpte not present (%lx)",gpte );
- return 0;
- }
-
- return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK);
-}
-
static void vmx_io_instruction(struct xen_regs *regs,
unsigned long exit_qualification, unsigned long inst_len)
{
}
if ((guest_eip & PAGE_MASK) == ((guest_eip + inst_len) & PAGE_MASK)) {
- if ( unlikely(__get_user(gpte, (unsigned long *)
- &linear_pg_table[guest_eip >> PAGE_SHIFT])) )
- {
- printk("inst_copy_from_guest- EXIT: read gpte faulted" );
- return 0;
- }
+ gpte = gva_to_gpte(guest_eip);
mfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
inst_start = (unsigned char *)map_domain_mem(ma);
unmap_domain_mem(inst_start);
} else {
// Todo: In two page frames
+ BUG();
}
return inst_len;
ioreq_t *p;
struct mi_per_cpu_info *mpci_p;
struct xen_regs *inst_decoder_regs;
- extern inline unsigned long gva_to_gpa(unsigned long gva);
extern long evtchn_send(int lport);
extern long do_block(void);
}
-void handle_mmio(unsigned long va, unsigned long gpte, unsigned long gpa)
+void handle_mmio(unsigned long va, unsigned long gpa)
{
unsigned long eip;
unsigned long inst_len;
unmap_domain_mem(mpl2e);
}
+static inline unsigned long gva_to_gpte(unsigned long gva)
+{
+ unsigned long gpde, gpte, pfn, index;
+ struct exec_domain *ed = current;
+
+ __guest_get_l2e(ed, gva, &gpde);
+ if (!(gpde & _PAGE_PRESENT))
+ return 0;
+
+ index = (gva >> L2_PAGETABLE_SHIFT);
+
+ if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) {
+ pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
+ ed->arch.guest_pl2e_cache[index] =
+ mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ }
+
+ if ( unlikely(__get_user(gpte, (unsigned long *)
+ &linear_pg_table[gva >> PAGE_SHIFT])) )
+ return 0;
+
+ return gpte;
+}
+
+static inline unsigned long gva_to_gpa(unsigned long gva)
+{
+ unsigned long gpte;
+
+ gpte = gva_to_gpte(gva);
+ if ( !(gpte & _PAGE_PRESENT) )
+ return 0;
+
+ return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK);
+}
+
#endif /* CONFIG_VMX */
static inline void __shadow_mk_pagetable(struct exec_domain *ed)